This requires us to explicitly specify _PAGE_USER for kernel mappings.
Original patch by Jan Beulich <jbeulich@novell.com> and Gerd Hoffmann.
Signed-off-by: Keir Fraser <keir@xensource.com>
return NULL;
area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr;
- flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+ flags |= _KERNPG_TABLE;
if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
phys_addr>>PAGE_SHIFT,
size, __pgprot(flags), domid)) {
struct dma_mapping_ops* dma_ops;
EXPORT_SYMBOL(dma_ops);
+#ifdef CONFIG_XEN_COMPAT_030002
+unsigned int __kernel_page_user;
+EXPORT_SYMBOL(__kernel_page_user);
+#endif
+
extern unsigned long *contiguous_bitmap;
static unsigned long dma_reserve __initdata;
addr = page[pud_index(__START_KERNEL_map)];
addr_to_page(addr, page);
+#ifdef CONFIG_XEN_COMPAT_030002
+ /* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
+ in kernel PTEs. We check that here. */
+ if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
+ unsigned long *pg;
+ pte_t pte;
+
+ /* Mess with the initial mapping of page 0. It's not needed. */
+ BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
+ addr = page[pmd_index(__START_KERNEL_map)];
+ addr_to_page(addr, pg);
+ pte.pte = pg[pte_index(__START_KERNEL_map)];
+ BUG_ON(!(pte.pte & _PAGE_PRESENT));
+
+ /* If _PAGE_USER isn't set, we obviously do not need it. */
+ if (pte.pte & _PAGE_USER) {
+ /* _PAGE_USER is needed, but is it set implicitly? */
+ pte.pte &= ~_PAGE_USER;
+ if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
+ pte, 0) != 0) ||
+ !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
+ /* We need to explicitly specify _PAGE_USER. */
+ __kernel_page_user = _PAGE_USER;
+ }
+ }
+#endif
+
/* Construct mapping of initial pte page in our own directories. */
init_level4_pgt[pgd_index(__START_KERNEL_map)] =
mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
#define _PAGE_PROTNONE 0x080 /* If not present */
#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
+#ifdef CONFIG_XEN_COMPAT_030002
+extern unsigned int __kernel_page_user;
+#else
+#define __kernel_page_user 0
+#endif
+
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define __PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
#define __PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
#define __PAGE_KERNEL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
#define __PAGE_KERNEL_VSYSCALL \
(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
can temporarily clear it. */
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+#define pmd_bad(x) ((pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
+ != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)